SaifPunjwani commited on
Commit
0465544
1 Parent(s): c1686ae

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +10 -0
  2. keypoints_video/allocentric_2lfVFusH-lA.mp4 +3 -0
  3. keypoints_video/allocentric_2vwQyeV-LQ4.mp4 +3 -0
  4. keypoints_video/allocentric_MuRVOQY8KoY.mp4 +3 -0
  5. keypoints_video/allocentric_SCPpM9i7GPU.mp4 +3 -0
  6. keypoints_video/allocentric_p0A_IRKfG-w.mp4 +3 -0
  7. keypoints_video/allocentric_ppxK4R8XWfU.mp4 +3 -0
  8. keypoints_video/allocentric_vm9vMjOPr2k.mp4 +3 -0
  9. keypoints_video/allocentric_wW7Z52plM0s.mp4 +3 -0
  10. keypoints_video/allocentric_xF4GkHLiHJQ.mp4 +3 -0
  11. keypoints_video/makeup_OFxVtlKAu7Y.mp4 +3 -0
  12. transcript/allocentric_SCPpM9i7GPU.txt +0 -0
  13. transcript/allocentric_jgxNs1WBONk.txt +66 -0
  14. transcript/allocentric_mhVsMmcOxQM.txt +79 -0
  15. transcript/allocentric_p0A_IRKfG-w.txt +20 -0
  16. transcript/allocentric_rbItjWcSHbs.txt +16 -0
  17. transcript/allocentric_tBidCJnzE4g.txt +240 -0
  18. transcript/allocentric_uxBeSEughAc.txt +54 -0
  19. transcript/allocentric_wW7Z52plM0s.txt +47 -0
  20. transcript/allocentric_xPiRQ1G241k.txt +127 -0
  21. video/TED_-FOCpMAww28.f140.m4a +3 -0
  22. video/TED_-FOCpMAww28.f247.webm +3 -0
  23. video/TED_-FOCpMAww28.mp4 +3 -0
  24. video/TED_1zpf8H_Dd40.f140.m4a +3 -0
  25. video/TED_1zpf8H_Dd40.f616.mp4 +3 -0
  26. video/TED_1zpf8H_Dd40.mp4 +3 -0
  27. video/TED_4TQETLZZmcM.f140.m4a +3 -0
  28. video/TED_4TQETLZZmcM.f248.webm +3 -0
  29. video/TED_4TQETLZZmcM.mp4 +3 -0
  30. video/TED_4jwUXV4QaTw.f251.webm +3 -0
  31. video/TED_4jwUXV4QaTw.f616.mp4 +3 -0
  32. video/TED_4jwUXV4QaTw.mp4 +3 -0
  33. video/TED_79HMPQj55yc.f251.webm +3 -0
  34. video/TED_79HMPQj55yc.mp4 +3 -0
  35. video/TED_8S0FDjFBj8o.f251.webm +3 -0
  36. video/TED_8S0FDjFBj8o.mp4 +3 -0
  37. video/TED_E6NTM793zvo.f140.m4a +3 -0
  38. video/TED_E6NTM793zvo.f248.webm +3 -0
  39. video/TED_E6NTM793zvo.mp4 +3 -0
  40. video/TED_I5x1wQ6kHX0.f140.m4a +3 -0
  41. video/TED_I5x1wQ6kHX0.f616.mp4 +3 -0
  42. video/TED_I5x1wQ6kHX0.mp4 +3 -0
  43. video/TED_K0pxo-dS9Hc.f251.webm +3 -0
  44. video/TED_K0pxo-dS9Hc.mp4 +3 -0
  45. video/TED_Ks-_Mh1QhMc.f247.webm +3 -0
  46. video/TED_Ks-_Mh1QhMc.f251.webm +3 -0
  47. video/TED_Ks-_Mh1QhMc.mp4 +3 -0
  48. video/TED_L9UIF852Boo.f251.webm +3 -0
  49. video/TED_L9UIF852Boo.f616.mp4 +3 -0
  50. video/TED_OyK0oE5rwFY.f248.webm +3 -0
.gitattributes CHANGED
@@ -1339,3 +1339,13 @@ keypoints/allocentric_SCPpM9i7GPU.json filter=lfs diff=lfs merge=lfs -text
1339
  keypoints/allocentric_Z550DeGoTgU.json filter=lfs diff=lfs merge=lfs -text
1340
  keypoints/allocentric_2lfVFusH-lA.json filter=lfs diff=lfs merge=lfs -text
1341
  keypoints/allocentric_qYYTOnevfrk.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
1339
  keypoints/allocentric_Z550DeGoTgU.json filter=lfs diff=lfs merge=lfs -text
1340
  keypoints/allocentric_2lfVFusH-lA.json filter=lfs diff=lfs merge=lfs -text
1341
  keypoints/allocentric_qYYTOnevfrk.json filter=lfs diff=lfs merge=lfs -text
1342
+ video/TED_E6NTM793zvo.f140.m4a filter=lfs diff=lfs merge=lfs -text
1343
+ video/TED_P_6vDLq64gE.f140.m4a filter=lfs diff=lfs merge=lfs -text
1344
+ video/TED_1zpf8H_Dd40.f140.m4a filter=lfs diff=lfs merge=lfs -text
1345
+ video/TED_4TQETLZZmcM.f140.m4a filter=lfs diff=lfs merge=lfs -text
1346
+ video/TED_I5x1wQ6kHX0.f140.m4a filter=lfs diff=lfs merge=lfs -text
1347
+ video/TED_cef35Fk7YD8.f140.m4a filter=lfs diff=lfs merge=lfs -text
1348
+ video/TED_nvaPzA50eQA.f140.m4a filter=lfs diff=lfs merge=lfs -text
1349
+ video/TED_rSQNi5sAwuc.f140.m4a filter=lfs diff=lfs merge=lfs -text
1350
+ video/TED_-FOCpMAww28.f140.m4a filter=lfs diff=lfs merge=lfs -text
1351
+ video/podcast_d8w9gn5yQQg.f303.webm.part filter=lfs diff=lfs merge=lfs -text
keypoints_video/allocentric_2lfVFusH-lA.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fa77a43e9159772dcbf44a34b6b6a2da7edd6ca7b1aaa3fea2c7ffbc3820b75
3
+ size 2521629822
keypoints_video/allocentric_2vwQyeV-LQ4.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18aee641d7f5d45452fdd5de91984a6f9436c3320edae68b4b971c0a107cf1c2
3
+ size 3906898500
keypoints_video/allocentric_MuRVOQY8KoY.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d825cd057780932575852fb93b508edb87ed50ee7f00b6cbc14d5a559c3849b
3
+ size 3265493982
keypoints_video/allocentric_SCPpM9i7GPU.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da69e8271292ec5bc30610f83086f8e56c60a8980c56fdcce1a8644128b9b2a7
3
+ size 1734370226
keypoints_video/allocentric_p0A_IRKfG-w.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae41e76b27f02dcee1d75bcf924121737137a3247290718b393f9571030be13b
3
+ size 42256144
keypoints_video/allocentric_ppxK4R8XWfU.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d948ebb0e47c00b1919cc3b848999d15daccd119d05508a9a5c566a18bfcac3
3
+ size 2752547484
keypoints_video/allocentric_vm9vMjOPr2k.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e52d574991e783622f6d21e102e3433534e2ba25dbf5de26365b3e3971839b76
3
+ size 4957708817
keypoints_video/allocentric_wW7Z52plM0s.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d64e06b2aacd697cec99cc6c5ef1438e6805a8c599af4b9944afc58bed2bbfdc
3
+ size 195634511
keypoints_video/allocentric_xF4GkHLiHJQ.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21f38ecc3014caceeece05c17d3c4dc119fe98e9ee3769836c0cfa48ea2cea4e
3
+ size 2707666751
keypoints_video/makeup_OFxVtlKAu7Y.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2669d3557b8caddeb1511c354849e213fa25583d2faf1ce0c822efc6ada22d2d
3
+ size 17291810449
transcript/allocentric_SCPpM9i7GPU.txt ADDED
The diff for this file is too large to render. See raw diff
 
transcript/allocentric_jgxNs1WBONk.txt ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [0.000 --> 2.000] Bella, do's we built a party?
2
+ [2.000 --> 4.000] No, the mirror is math science.
3
+ [4.000 --> 6.000] History and rambling, the mystery
4
+ [6.000 --> 9.000] that all started with a big bang.
5
+ [9.000 --> 10.000] Hey.
6
+ [10.000 --> 11.000] Hey.
7
+ [11.000 --> 12.000] Hey.
8
+ [12.000 --> 13.000] Everything's smoothed out with Amy.
9
+ [13.000 --> 15.000] Uh, no, she's still pretty mad.
10
+ [15.000 --> 18.000] Did you make the apology as sincere as I would have?
11
+ [18.000 --> 21.000] I said, children, says he's sorry.
12
+ [21.000 --> 23.000] Well, I have to hang it on a little thick.
13
+ [26.000 --> 29.000] Hey, it's time you apologize to her yourself.
14
+ [29.000 --> 30.000] I suppose so.
15
+ [30.000 --> 32.000] But if I get out of line, I'll lose my spot.
16
+ [32.000 --> 35.000] I'm happy to hold your place till you get back.
17
+ [45.000 --> 46.000] Good morning, sir.
18
+ [46.000 --> 48.000] What can I get started for you today?
19
+ [48.000 --> 50.000] It's a pleasure, sir.
20
+ [50.000 --> 52.000] Uh-oh. What's nice?
21
+ [52.000 --> 53.000] All right.
22
+ [53.000 --> 55.000] Three empty glasses.
23
+ [55.000 --> 57.000] Would you care for a pastry?
24
+ [57.000 --> 58.000] Nope.
25
+ [60.000 --> 61.000] Uh, mocha.
26
+ [61.000 --> 63.000] Three mocha lattes.
27
+ [66.000 --> 68.000] Uh, double chocolate chip muffin.
28
+ [68.000 --> 69.000] Yup.
29
+ [73.000 --> 74.000] Ah, ah, ah, ah, ah.
30
+ [74.000 --> 76.000] You had three palm and cartonies in these.
31
+ [76.000 --> 77.000] Puzzle.
32
+ [77.000 --> 81.000] And I love the Beatles' state for my life.
33
+ [81.000 --> 82.000] Something.
34
+ [82.000 --> 84.000] Uh.
35
+ [84.000 --> 86.000] What's that, son?
36
+ [86.000 --> 88.000] Hey, bring him Lincoln.
37
+ [88.000 --> 89.000] Uh-huh.
38
+ [89.000 --> 92.000] Do you know you, me, gone?
39
+ [92.000 --> 93.000] Shoot.
40
+ [93.000 --> 94.000] Oh, yeah.
41
+ [94.000 --> 96.000] Three shots.
42
+ [96.000 --> 98.000] Uh.
43
+ [126.000 --> 128.000] Uh.
44
+ [157.000 --> 164.000] Is that everyone I do want to war?
45
+ [179.000 --> 181.000] Avengers!
46
+ [187.000 --> 188.000] No!
47
+ [203.000 --> 205.000] It's not much.
48
+ [205.000 --> 206.000] But it's home.
49
+ [206.000 --> 208.000] I think it's brilliant.
50
+ [210.000 --> 213.000] Where have you been?
51
+ [214.000 --> 217.000] Harry, how wonderful to see you, dear.
52
+ [217.000 --> 218.000] Bed's empty?
53
+ [218.000 --> 219.000] No note?
54
+ [219.000 --> 220.000] Car gone?
55
+ [220.000 --> 222.000] You got a dime.
56
+ [222.000 --> 224.000] You could have been seen.
57
+ [224.000 --> 227.000] Of course, I don't blame you, Harry, dear.
58
+ [227.000 --> 229.000] They were starving, in, mum.
59
+ [229.000 --> 231.000] They were bars in his window.
60
+ [231.000 --> 235.000] You best hope I don't put bars on your window, Donald Weasley.
61
+ [235.000 --> 236.000] Come on, Harry.
62
+ [236.000 --> 238.000] Time for spotted breakfast.
63
+ [244.000 --> 245.000] Oh.
64
+ [251.000 --> 252.000] Lincoln!
65
+ [274.000 --> 275.000] What?
66
+ [293.000 --> 294.000] Ah!
transcript/allocentric_mhVsMmcOxQM.txt ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [0.000 --> 6.720] Welcome to the presentation of Where Should I Look?
2
+ [6.720 --> 9.640] Comparing reference frames for spatial tactile cues.
3
+ [9.640 --> 14.560] My name is Eric Pescara and my co-authors are Anton Stubenwad to be a Svartiger, Ikunt
4
+ [14.560 --> 18.000] Fang and Michal Beigel.
5
+ [18.000 --> 21.920] When designing tactile displays on the wrist for special cues, it is important to keep
6
+ [21.920 --> 24.240] the natural movement of the body and mind.
7
+ [24.240 --> 28.560] Depending on the movement of the wrist, different reference frames can influence the output
8
+ [28.560 --> 30.040] of the tactile display.
9
+ [30.040 --> 34.280] In this paper, we compare it in allocentric reference frame with the wrist centered reference
10
+ [34.280 --> 38.280] frame in terms of accuracy, reaction time and cognitive load.
11
+ [38.280 --> 41.640] We conducted a repeated measures user study with 20 participants.
12
+ [41.640 --> 46.520] We used a tactile wristband with 10 evenly spaced actuators as our tactile display.
13
+ [46.520 --> 51.680] The data we retrieved from the experiment consisted of 120 spatial localization tasks per
14
+ [51.680 --> 53.800] participant per reference frame.
15
+ [53.800 --> 57.800] As a measure of cognitive load, we asked the participants to fill out a raw TLX survey
16
+ [57.800 --> 59.400] after every condition.
17
+ [59.400 --> 62.600] A spatial localization task was conducted as follows.
18
+ [62.600 --> 68.560] First, a combination of wrist rotation and actuator was drawn from a pre-determined list.
19
+ [68.560 --> 73.400] The participant was then instructed to turn the wrist to match the given wrist rotation.
20
+ [73.400 --> 78.040] Depending on the reference frame, the corresponding spatial direction was calculated.
21
+ [78.040 --> 82.320] The selected actuator was then activated for one second.
22
+ [82.320 --> 85.120] The participant then was asked to input the direction.
23
+ [85.120 --> 89.040] The input was stored together with the reaction time in the true direction.
24
+ [89.040 --> 94.960] This process was then repeated until the list of spatial localization tasks was exhausted.
25
+ [94.960 --> 100.000] In the evaluation, we first looked if localization accuracy was influenced by the reference frame.
26
+ [100.000 --> 103.920] Both reference frames had a high localization accuracy for the wrist and showed no statistical
27
+ [103.920 --> 105.920] difference.
28
+ [105.920 --> 111.040] While the wrist centered reference frame had an accuracy of 84% and an average error of 28.5
29
+ [111.040 --> 115.760] degrees, the allocentric reference frame was only marginally better with an accuracy
30
+ [115.760 --> 121.040] of 85% and an average error of 26.8 degrees.
31
+ [121.040 --> 125.960] When comparing reaction time, we can see a difference between the reference frames.
32
+ [125.960 --> 131.280] There is a statistically significant difference in reaction time with a small effect size.
33
+ [131.280 --> 136.200] The allocentric reference frame has a slightly faster reaction time with an average difference
34
+ [136.200 --> 140.040] of 240 milliseconds.
35
+ [140.040 --> 144.880] For measuring the effects of the wrist rotation on the participant's reaction time, we performed
36
+ [144.880 --> 151.200] a linear regression analysis with a wrist rotation as predictor on the reaction time.
37
+ [151.200 --> 155.840] While there is a clear relation between increasing reaction time and higher wrist rotation for
38
+ [155.840 --> 161.560] the wrist centered reference frame, the allocentric reference frame is mostly unaffected by the wrist
39
+ [161.560 --> 164.240] rotation.
40
+ [164.240 --> 168.760] For measuring the effects of the wrist rotation on the participant's accuracy, we also performed
41
+ [168.760 --> 174.120] a linear regression analysis with the wrist rotation as predictor of the accuracy.
42
+ [174.120 --> 178.960] While there is a clear relation between decreasing accuracy and higher wrist rotation for the
43
+ [178.960 --> 183.940] wrist centered reference frame, the allocentric reference frame is mostly unaffected by the
44
+ [183.940 --> 186.680] wrist rotation.
45
+ [186.680 --> 191.600] We looked at the data collected by the RRTLX questionnaires to analyze how the participant's
46
+ [191.600 --> 194.800] mental demand was affected under both conditions.
47
+ [194.800 --> 199.160] The RRTLX data was separately evaluated for each dimension.
48
+ [199.160 --> 203.600] The allocentric reference frame yielded better results in every dimension.
49
+ [203.600 --> 208.960] There was a statistically significant difference between the reference frames in all dimensions
50
+ [208.960 --> 212.440] with most dimensions only having a low effect size.
51
+ [212.440 --> 217.840] However, we found that the mental demand dimension had a large effect size and the performance
52
+ [217.840 --> 222.080] in effort dimensions had a moderate effect size.
53
+ [222.080 --> 226.800] The participants also described the wrist centered reference frame as far less intuitive
54
+ [226.800 --> 228.440] and more demanding.
55
+ [228.440 --> 233.480] This leads us to conclude that the mental demand of the allocentric reference frame is lower
56
+ [233.480 --> 237.080] than the wrist centered reference frame.
57
+ [237.080 --> 241.160] Linear regression was used to measure the learning effect during the experiment for both
58
+ [241.160 --> 245.560] conditions using trial number as predictor of our reaction time.
59
+ [245.560 --> 250.080] Most statistically significant evidence was found that the trial number influenced the
60
+ [250.080 --> 253.960] reaction time for the wrist centered reference frame.
61
+ [253.960 --> 258.520] We found statistically significant evidence that the trial number influenced the reaction
62
+ [258.520 --> 260.920] time for the allocentric condition.
63
+ [260.920 --> 265.000] Our participants improved the reaction time in the allocentric reference frame during
64
+ [265.000 --> 267.320] the experiment.
65
+ [267.320 --> 272.680] In conclusion, in the experiment we conducted we found evidence that an allocentric reference
66
+ [272.680 --> 277.200] frame outperforms a wrist centered reference frame for spatial localization tasks with a
67
+ [277.200 --> 279.800] wrist-one tactile variable.
68
+ [279.800 --> 284.000] In our experiment the participants had faster reaction times with the allocentric reference
69
+ [284.000 --> 287.480] frame and improved during the experiment.
70
+ [287.480 --> 292.000] We found evidence that the allocentric reference frame was mentally less demanding and more
71
+ [292.000 --> 294.000] intuitive.
72
+ [294.000 --> 299.240] And the allocentric reference frame was more robust against postural changes compared to
73
+ [299.240 --> 302.360] the wrist-centered reference frame.
74
+ [302.360 --> 307.160] In the future we would like to investigate the influence of different reference frames
75
+ [307.240 --> 313.000] for spatial localization tasks in the wild with a broad range of activities.
76
+ [313.000 --> 317.680] We would also like to include more than just wrist rotations in the postural changes.
77
+ [317.680 --> 322.240] And as there is conflicting evidence in the literature which reference frames perform
78
+ [322.240 --> 329.280] better in which circumstances we would like to investigate this further.
79
+ [329.280 --> 331.160] Thank you for your attention and have a great day.
transcript/allocentric_p0A_IRKfG-w.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [0.000 --> 2.400] Many people believe you should never say,
2
+ [2.400 --> 4.580] I don't know, to a question.
3
+ [4.580 --> 6.620] Let's say at the end of a presentation.
4
+ [6.620 --> 9.740] Big picture, it's 100% acceptable.
5
+ [9.740 --> 13.180] If you don't know the answer, don't try to BS them.
6
+ [13.180 --> 15.860] They will smell blood in the water.
7
+ [15.860 --> 17.980] What matters is how you say it.
8
+ [17.980 --> 21.320] I've seen ultra-confident speakers handle this
9
+ [21.320 --> 22.940] in one of three ways.
10
+ [22.940 --> 24.860] First, you can say, I don't know,
11
+ [24.860 --> 27.260] but I'll look into it and get back to you
12
+ [27.260 --> 28.860] and make sure you follow up.
13
+ [28.920 --> 31.660] Second, I don't know, but I recommend you ask,
14
+ [31.660 --> 33.180] John, that's his area.
15
+ [33.180 --> 34.720] And make sure you refer to somebody
16
+ [34.720 --> 37.580] who really is the best person to ask.
17
+ [37.580 --> 40.620] Third, tell them what you do know instead.
18
+ [40.620 --> 44.660] Say, I don't know about X, but here's what I can tell you.
19
+ [44.660 --> 46.180] Make sure whatever you say next
20
+ [46.180 --> 48.580] adds a genuine value to the conversation.
transcript/allocentric_rbItjWcSHbs.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [0.000 --> 2.000] Egocentric
2
+ [2.000 --> 4.000] Adjective
3
+ [4.000 --> 7.000] Selfish, self-centered
4
+ [7.000 --> 11.000] Egocentric
5
+ [11.000 --> 13.000] Adjective
6
+ [13.000 --> 15.000] Egotistical
7
+ [15.000 --> 19.000] Egocentric
8
+ [19.000 --> 22.000] Adjective
9
+ [22.000 --> 28.240] Relating to spatial representations, linked to a reference frame based on one's own location
10
+ [28.240 --> 32.720] within the environment, as when giving the direction as right, rather than
11
+ [32.720 --> 38.240] north, opposed to alo-centric.
12
+ [38.240 --> 40.240] Egocentric
13
+ [40.240 --> 42.240] Noun
14
+ [42.240 --> 45.240] A person who is Egocentric
15
+ [45.240 --> 56.240] == References ==
16
+ [56.240 --> 58.400] Please support us with your subscription
transcript/allocentric_tBidCJnzE4g.txt ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [0.000 --> 7.060] Hello friends, my name is Jessica and I am an ASHA certified speech and language
2
+ [7.060 --> 12.540] pathologist and I am obsessed with learning about all things social
3
+ [12.540 --> 17.140] communication. I am also obsessed with teaching others about them which is why
4
+ [17.140 --> 23.660] we're here on YouTube. Yay! So today I want to talk to you about non-verbal
5
+ [23.660 --> 30.860] communication. Nonverbal communication can be really tricky to teach. Most of us
6
+ [30.860 --> 36.900] have learned these skills naturally so identifying what they are and how to
7
+ [36.900 --> 40.540] break them down and teach them in a strategic way can actually be quite
8
+ [40.540 --> 45.680] challenging. So today we are going to talk about some ways that we can teach
9
+ [45.680 --> 53.620] our students about non-verbal communication and non-verbal language.
10
+ [54.660 --> 59.460] Now first I'm going to talk to you a little bit about non-verbal
11
+ [59.460 --> 65.740] communication. There are seven or eight-ish types of non-verbal communication.
12
+ [65.740 --> 78.800] They are facial expressions, body language, gestures, tone of voice, touch, eye
13
+ [78.800 --> 84.800] contact and personal space. Okay, you like those gestures? I just made them up on
14
+ [84.800 --> 90.120] the fly. Alright, so those are the seven areas of nonverbal communication. I
15
+ [90.120 --> 95.320] said eight because personal hygiene or personal appearance, sorry, can also be
16
+ [95.320 --> 100.560] considered a type of nonverbal communication. How we are choosing to appear
17
+ [100.560 --> 105.720] physically does communicate a lot about us. Okay, so let's break this down a
18
+ [105.720 --> 110.200] minute and now you know the different kinds of nonverbal communication. Let's
19
+ [110.200 --> 116.100] talk about what nonverbal communication is. It is any kind of communication that
20
+ [116.100 --> 122.680] occurs without words. It is not verbal, right? See how that works? So like I said,
21
+ [122.680 --> 127.840] it includes the following areas, facial expressions, body language, gestures, tone
22
+ [127.840 --> 133.000] of voice, touch, eye contact, personal space and physical appearance if you
23
+ [133.000 --> 139.800] want to click that. It is very common for individuals to struggle with nonverbal
24
+ [139.800 --> 145.000] communication. If your child has been diagnosed with autism, that means or your
25
+ [145.000 --> 149.600] student, that means that they have some trouble understanding and using nonverbal
26
+ [149.600 --> 155.080] communication. So the skill is extremely important to teach and learn because
27
+ [155.080 --> 160.800] learning how to understand nonverbal communication helps us to interact
28
+ [160.800 --> 166.840] socially with others and it helps us to communicate better with others. Okay, so
29
+ [166.840 --> 173.760] now we're going to break down each of the seven or eight sections of nonverbal
30
+ [173.760 --> 176.720] communication. We're going to break them down. We're going to talk about them.
31
+ [176.720 --> 180.680] We're going to define them and I'm going to give you some ideas on how you can
32
+ [180.680 --> 185.440] teach your student to understand each of these different kinds of nonverbal
33
+ [185.440 --> 189.920] communication. So the first one we're going to talk about is facial expressions. I
34
+ [189.920 --> 193.720] am looking off my computer if you keep seeing my eyes start away. I just full
35
+ [193.720 --> 199.000] disclosure. I need my notes because I want this to be good and helpful for you and
36
+ [199.000 --> 206.360] I can't do that if I'm doing this all my memory. So I hope you understand. Also,
37
+ [206.360 --> 211.080] before we dive in and I go any further, thank you for being here. Thank you for
38
+ [211.080 --> 215.400] taking the time to learn something new that will hopefully help you teach your
39
+ [215.400 --> 220.640] students a skill that they're struggling with. That is just absolutely awesome
40
+ [220.640 --> 224.520] and amazing and I thank you for choosing to spend your time with me. So before I
41
+ [224.520 --> 227.880] go any further, if you've not already gotten something to take notes with, I
42
+ [227.880 --> 233.240] would really recommend getting some pen and some paper and jotting down some
43
+ [233.240 --> 236.480] things to help you remember what we're talking about. Okay, so let's jump in.
44
+ [236.480 --> 240.920] The first type of nonverbal communication we're going to talk about is facial
45
+ [240.920 --> 252.000] expressions. Right? There are six main facial expressions that people make. Now,
46
+ [252.000 --> 260.360] each facial expression is related to an emotion. Each type of facial expression
47
+ [260.360 --> 266.280] has a very specific characteristics that will help you know what emotion the
48
+ [266.280 --> 271.680] person is feeling. Okay, so let's think about this. We're going to break it down a
49
+ [271.680 --> 278.600] little bit more. There are six types of facial expressions. Happy, sad, angry,
50
+ [278.600 --> 285.440] fear, disgust, and surprised. Scientists tell us that these are the basic
51
+ [285.440 --> 294.000] emotions that we all experience. Every other emotion is a root or a different
52
+ [294.000 --> 300.960] form of these basic emotions. So our facial expressions, we can say we're happy.
53
+ [300.960 --> 315.360] Sad, angry, scared, disgusted. We're surprised. Okay, each of these six basic
54
+ [315.360 --> 321.960] emotions have very distinct characteristics of the face. Okay, so going back
55
+ [321.960 --> 330.040] through them. When you feel happy, you have a wide smile and open now. You can
56
+ [330.040 --> 335.880] see that some teeth. You can see wrinkles around the eyes and the cheeks are
57
+ [335.880 --> 343.840] raised and the eyes are slightly squinted. Did you see all this in my face?
58
+ [343.840 --> 349.680] Can you see them? Can you see my wrinkles? My high-raised cheeks? My teeth? My smile?
59
+ [349.680 --> 357.480] I'm happy. I'm happy to be here. So that is happy. Second facial expression that
60
+ [357.480 --> 364.400] we can see is sad. Okay, the corners of the lips pointed down.
61
+ [364.400 --> 375.680] Inner eyebrows are raised up. My eyebrows apparently don't move like that. But you
62
+ [375.680 --> 383.400] know it's a face when you see one. Okay, next. Angry. There's tension in the face.
63
+ [383.400 --> 390.480] There's these closed, V-shaped eyebrows. The mouth, if the mouth is open, it's
64
+ [390.480 --> 396.120] square shaped. Square shaped, if you can understand that. If the mouth is closed,
65
+ [396.120 --> 403.200] there's tightness in the chin and jaws. Do you see all those characteristics?
66
+ [403.440 --> 415.440] Alright, fear. Slanted and raised eyebrows. Eyes are open very wide. Just saw a bug
67
+ [415.440 --> 420.660] crawl across my table. I don't know. Right? You know what fear looks like.
68
+ [420.660 --> 428.360] Disgust. A wrinkled nose. Lowered eyebrows. The mouth is tight and curved downward in the upper
69
+ [428.360 --> 436.520] lips go up. Big one across my table. I bug really didn't go across my table just
70
+ [436.520 --> 442.760] now. I'm just using that as an example. And last is surprised. Raised and curved
71
+ [442.760 --> 450.120] eyebrows. Horizontal wrinkles on the forehead. Open lips. Dropped jaw. Eyes open wide.
72
+ [450.840 --> 458.440] You see my wrinkles? Horizontal? Eyes. Mouth. I have a surprised face. So like I said,
73
+ [459.400 --> 466.280] I start by teaching my students these physical characteristics to look for when trying to
74
+ [466.280 --> 475.080] interpret a facial expression. Now an interesting tip is students with autism. We know that they struggle
75
+ [475.160 --> 481.080] with eye contact. So part of the reason that they struggle with understanding emotions is because
76
+ [481.080 --> 488.040] they are focusing on this lower third of the face. So a lot of these cues, like we talked about,
77
+ [488.040 --> 494.760] the horizontal wrinkles. This wrinkles around my eyes. Those are occurring in the top third of my face.
78
+ [494.760 --> 501.400] So a child or individual who does not focus on this top third is missing a lot of cues that's
79
+ [501.400 --> 508.520] going to help them learn to read and understand facial expressions. So to teach facial expressions,
80
+ [508.520 --> 515.400] to teach my students how to understand them, we again, I go over each of those definitions,
81
+ [515.400 --> 522.520] model them like I did for you. And they try and draw them so that they're having, you know,
82
+ [522.520 --> 527.640] an interactive process. And then we'll probably look at maybe some video clips or some pictures
83
+ [527.640 --> 535.160] or things to talk about those basic facial expressions. Again, really focus on this top third of
84
+ [535.160 --> 540.840] the face because we're getting a lot of cues there that if a child is not looking at somebody's
85
+ [540.840 --> 545.960] eyes or their top third of the face, they're going to miss those cues. Now we know there are more
86
+ [545.960 --> 554.040] emotions beyond happy, sad, mad, disgust, surprised, and angry. But we talk about these different
87
+ [554.040 --> 560.680] more complex emotions and how the same facial expressions are generally going to be used to convey
88
+ [560.680 --> 567.800] those complex emotions. So what we will do is we will, I'll give them a list of say, of some words.
89
+ [567.800 --> 579.640] For example, nervous, satisfied, amused, annoyed, love, revulsion. We're going to target, first of
90
+ [579.640 --> 585.080] all, some really good vocabulary words. But we're going to talk about what basic emotion,
91
+ [585.080 --> 591.560] these complex emotions are the most like. And then talk about kind of how the face is going to
92
+ [592.280 --> 600.280] model those similar facial expressions for the complex emotions as they do the basic emotions.
93
+ [601.160 --> 608.120] All right, gestures. Gestures are movements that we make with our bodies that are used to
94
+ [608.120 --> 614.840] communicate a message. We most frequently use our hands to gesture, but other parts of the bodies
95
+ [614.840 --> 622.920] can be used as well. Now, there are three types of gestures. Gestures that indicate nervousness,
96
+ [622.920 --> 627.640] such as fidgeting with objects, or my personal is playing with my hair.
97
+ [629.640 --> 634.520] Gestures with a specific meaning, such as a thumbs up, we know that means good job, it has a
98
+ [634.520 --> 640.200] meaning, and gestures that go with a verbal message, such as me, using my hands as I'm talking
99
+ [640.200 --> 647.320] and telling you a story. So when I'm teaching these, I focus mostly on teaching gestures with a
100
+ [647.320 --> 654.920] specific meaning. Think of these like gestures as a vocabulary word. We will talk about different
101
+ [654.920 --> 660.600] kinds of gestures, and then we will define it. For example, we will talk about the beckoning gesture,
102
+ [661.320 --> 666.680] and we will talk about what it means. It means come here. We will talk about the talking gesture.
103
+ [669.000 --> 674.200] What does that mean? She is talking and she needs to be quiet and tired of it, or we will talk about
104
+ [674.200 --> 681.160] a thump, or we will talk about the hitchhiking thumb. How that is different than a thumbs up.
105
+ [682.600 --> 687.880] Gestures like vocabulary words, we teach gestures, and I teach their meanings so that my students
106
+ [687.880 --> 692.680] are able to see someone using a gesture and define it like they would a vocabulary word.
107
+ [694.840 --> 699.800] In my non-verbal communication teaching guide, I have a whole list of different gestures
108
+ [701.480 --> 707.000] that you can use to know some gestures to teach. You can also find lists on the internet
109
+ [707.000 --> 713.640] if you're wanting to kind of DIY it yourself. All right, move and write along to touch. I believe that was
110
+ [713.640 --> 721.400] my gesture I used in the beginning. Touching someone is when you physically touch another person.
111
+ [722.360 --> 729.160] There are four different kinds of touching. There's actually five, but one of them is inappropriate,
112
+ [729.160 --> 737.720] so we're not going to talk about it here. There are four different kinds of touch. Functional,
113
+ [737.720 --> 746.680] professional, social polite, friendship, warmth, and love intimacy. Okay, let's talk about what
114
+ [746.680 --> 753.720] each of these are. A functional professional touch is the touching that occurs when a professional
115
+ [753.720 --> 759.400] must touch you to do his or her job. For example, the dentist has to touch your mouth. The hair lady
116
+ [759.400 --> 766.360] has to touch my hair. It's professional. I'm expecting her to touch me and she's doing it to do her job.
117
+ [767.320 --> 774.520] Second one is social polite, and this is touching that occurs in social routines. They're usually very
118
+ [774.520 --> 783.880] brief and they, let's see, sorry, I lost my spot. And they include things like a handshake, a pat on
119
+ [783.880 --> 788.680] the back, or a quick side hug. They're not going to last very long. We're just being polite. I'm
120
+ [788.680 --> 794.120] going to shake your hand and then we're done touching. Number three is friendship or warmth,
121
+ [794.120 --> 797.960] and this is touching that occurs between two people who are familiar with one another.
122
+ [799.800 --> 805.880] Now, when you teach this, or you know, you need to be very careful because this type of touch can
123
+ [805.880 --> 811.880] easily be confused with the next type, which is love intimacy. So you need to make sure that your
124
+ [811.880 --> 817.880] level of touch in this stage matches your partner so that you don't make that other person uncomfortable,
125
+ [817.880 --> 823.160] or you need to teach your student to make sure their level of touch matches their partners so they
126
+ [823.160 --> 830.120] don't make somebody uncomfortable. So friendship, warmth touching includes things like longer hugs,
127
+ [830.120 --> 836.280] putting your arms around the shoulders of somebody, or you know, holding hands. Well, holding
128
+ [836.280 --> 842.040] hands can also be in love intimacy. So if you're a good friend, you might give them a longer hug,
129
+ [842.040 --> 848.120] but if I hug you it's too long. Now I'm thinking, well, are we friends? Or is this like you being
130
+ [848.120 --> 856.600] intimate with me? So it's kind of that in between a social polite and intimacy. So the fourth one
131
+ [856.600 --> 864.280] is love intimacy, and this occurs between two people who are very close. This includes family,
132
+ [864.280 --> 872.280] very close friends, and significant others. You need to teach your students to be very careful to
133
+ [872.280 --> 877.880] use these touches with the appropriate people. Holding hands and touching someone's hair and
134
+ [877.880 --> 884.040] cuddling are all examples of love intimacy touching. So to teach this kind of nonverbal communication
135
+ [884.040 --> 891.480] touch, we just make a graph, and we talk about different, you know, I label, I make four different
136
+ [891.480 --> 896.840] squares. One is functional professional, one's social polite, friendship warmth and love intimacy,
137
+ [896.840 --> 902.200] and we make a list of the people who I would expect a functional professional touch with,
138
+ [902.200 --> 908.360] who I could expect a love intimacy touch with, who would be a good person to use friendship warmth
139
+ [908.360 --> 913.640] touch with, who should I use a social polite touch with. So we just sort people that we know into
140
+ [913.640 --> 921.800] the different categories of appropriate ways to touch them. Okay, next nonverbal communication
141
+ [921.800 --> 932.440] is proximics, aka personal space. So if somebody is too close to you, they're in your personal space,
142
+ [932.440 --> 939.400] and that's a type of nonverbal communication. Now, there are different kinds. There is a
143
+ [942.440 --> 950.600] scientific formula for what is appropriate as far as proximate goes. So proximics commonly called
144
+ [950.600 --> 957.640] personal space is the distance between you and another person. There are four levels intimate space,
145
+ [959.160 --> 965.240] personal space, social space, and public space. So we'll start from the middle and we'll work our
146
+ [965.240 --> 974.040] way out. Intimate space is anything that goes from touching your body to about 18 inches from
147
+ [974.040 --> 980.040] yourself. This is the space where you allow people with whom you are very close. So this could be
148
+ [980.040 --> 985.880] very close family members, very close friends, and significant others are probably the only people
149
+ [985.880 --> 993.880] you will allow in this space. Personal space is about 18 inches to four feet from your body. We will
150
+ [993.880 --> 1001.160] often allow friends and people we like in this space. Moving out again, we have social space. This
151
+ [1001.160 --> 1007.160] is four to ten feet from your body. This space is for people we don't know well, or for people with
152
+ [1007.160 --> 1014.440] whom we have no close relationship. Then last, the biggest ring is public space, which is what it
153
+ [1014.440 --> 1020.360] sounds like. Anything beyond ten feet from your body, where the rest of the public is, it contains
154
+ [1020.360 --> 1026.200] strangers and people who are not comfortable with. So this is important because it lets us know how
155
+ [1026.200 --> 1032.200] close it's appropriate to be to other people. And like I said, if somebody gets too close to me,
156
+ [1032.200 --> 1037.080] that makes me really uncomfortable if you're not one of my intimate people. At the same time,
157
+ [1037.640 --> 1043.000] if you're way out here in public space, but I think we're buds, that feels a little off to me too.
158
+ [1043.000 --> 1049.480] So to teach this, while I teach my students about these definitions, and then I like to get like
159
+ [1049.480 --> 1056.840] masking tape, and we measure this out on the ground to give an idea of what these spaces look like
160
+ [1056.840 --> 1063.720] visually. And then we'll do kind of that same activity that we did before, where we'll get the
161
+ [1063.720 --> 1069.960] four squares. And we will say, who are some people that I would allow in my intimate space?
162
+ [1070.600 --> 1075.320] Who are some people I would allow in my personal space? Who are people that might be in my social
163
+ [1075.320 --> 1080.120] space? And who are some people who would be in my public space? And we just think about
164
+ [1081.720 --> 1087.080] our space and our personal space and how we're sharing it and where people should be within that space.
165
+ [1087.240 --> 1096.280] Okay, the next type of nonverbal communication is whole body language. Our body language is the
166
+ [1096.280 --> 1102.360] way we position our head and our body to send a message to the people around us. When we tilt our
167
+ [1102.360 --> 1107.400] head to the side, it means I'm interested in what you're saying to me. If we lower our head, it
168
+ [1107.400 --> 1112.360] means we're probably unhappy. If our head stays upright, it means we're happy and we're in a good
169
+ [1112.360 --> 1119.160] mood or we're in a neutral mood that is neither happy nor unhappy. If we lean our body towards someone,
170
+ [1119.160 --> 1123.720] it means we're interested in what they have to say. And if we pull our body away from them,
171
+ [1123.720 --> 1127.000] it means we're not comfortable speaking to that person or that maybe we don't like them.
172
+ [1127.880 --> 1134.440] If you sit with an open posture like I am now, then it comes across as very welcoming and friendly.
173
+ [1135.080 --> 1141.400] If you close yourself up and you sit in the closed posture, then that is closed off. It's not as
174
+ [1141.400 --> 1148.680] welcoming and it doesn't look as friendly. So body language is also usually used with gestures
175
+ [1148.680 --> 1154.280] and facial expressions and tone of voice, all kind of combined together to give you a clue
176
+ [1154.280 --> 1161.320] as to what the other person is thinking. So to teach this skill, I will use video clips or maybe
177
+ [1161.320 --> 1169.960] look at pictures from a book and I will not do the audio if I can. Like if it's some of the
178
+ [1169.960 --> 1174.360] Disney shorts are really good for not having audio and you can just look at the body language.
179
+ [1176.680 --> 1180.440] But we'll look at the picture or the video clip and we'll describe the body language of the
180
+ [1180.440 --> 1185.400] person that's in it. And then we'll talk about what do we think that body language is communicating.
181
+ [1186.600 --> 1188.040] And we'll do that three or four times.
182
+ [1190.760 --> 1197.720] Okay, the next type of nonverbal communication is vocalix, which we commonly refer to as tone of voice.
183
+ [1199.960 --> 1208.280] This is how we use our voice, not our words, but our voice to convey a message. So think of the tone
184
+ [1208.280 --> 1214.280] of voice as the background to your words. Your tone of voice is going to change a lot based on
185
+ [1214.280 --> 1219.080] different situations. For example, you would use a different tone of voice at a football game
186
+ [1219.080 --> 1224.520] than you would in a nice restaurant. Your voice might also sound different in different context
187
+ [1224.520 --> 1229.560] when your emotions are changing. For example, your voice sounds different when you're nervous
188
+ [1229.560 --> 1235.720] versus in a situation where you're comfortable. And it's important to consider the context of each
189
+ [1235.720 --> 1244.440] situation when trying to understand the meaning of someone's voice. Vocal expression is also usually
190
+ [1244.440 --> 1250.760] tied to facial expressions. They go hand in hand. So this means if somebody's face looks sad,
191
+ [1250.760 --> 1257.320] their voice probably sounds sad too. So what I tell my students is if they have a hard time
192
+ [1257.320 --> 1262.040] understanding the tone of voice to also pay attention to the facial expressions and the body
193
+ [1262.040 --> 1270.600] language, to give them clues as to how the other person is feeling. Okay, so to teach vocalix or tone
194
+ [1270.600 --> 1277.880] of voice, what I will do is I will give my students a context and a facial expression and words.
195
+ [1277.880 --> 1285.000] And then they will practice using different tones of voice to say that word. So for example,
196
+ [1285.880 --> 1290.600] the context could be your brother or sister borrowed your shirt and gave it back with a stain on it.
197
+ [1290.600 --> 1295.000] The facial expression would be angry and the words would be thank you. Thank you.
198
+ [1297.000 --> 1301.480] Same words, thank you. This time your mom gave you broccoli for dinner. You hate broccoli.
199
+ [1302.920 --> 1309.560] Thank you. Same words again. Thank you. Your dad surprised you with a new phone. Thank you.
200
+ [1309.800 --> 1316.120] Thank you again. Someone hands you a tissue after you've been crying. Thank you.
201
+ [1316.920 --> 1323.240] So this talks about how different situations and different scenarios are going to sound different
202
+ [1323.240 --> 1327.960] with different tone of voice even though the words might be exactly this. Okay, now I would be
203
+ [1327.960 --> 1333.320] remiss to not talk about eye contact when we're talking about types of nonverbal communication.
204
+ [1333.560 --> 1339.640] Okay, understanding eye contact will help our students become better nonverbal communicators.
205
+ [1340.520 --> 1345.400] Remember how I talked about most of our students with autism focus on the lower two thirds of the
206
+ [1345.400 --> 1353.080] face. A lot is going on in these upper and this upper third. So teaching them why eye contact
207
+ [1353.080 --> 1358.520] is important or at least why looking at this upper area is important is going to help them become
208
+ [1358.600 --> 1364.440] a better nonverbal communicator. It also helps with connection and helps us to connect with others
209
+ [1364.440 --> 1369.640] and feel closer and it helps others feel closer to us. So I explain all of those things when I'm talking
210
+ [1369.640 --> 1376.200] about eye contact. The last one that we talked about is physical appearance. I again just kind of
211
+ [1376.200 --> 1381.560] briefly touch on this. I explain what physical appearance is and how you know sometimes some
212
+ [1381.560 --> 1386.840] things in your physical appearance you can change and some things you can't. So we talk about how you
213
+ [1387.080 --> 1394.760] know when you change your hair color or well okay some things like your height and your weight
214
+ [1394.760 --> 1399.640] and your natural hair color are things you cannot change. But you can change things like how you
215
+ [1399.640 --> 1404.440] dress and the accessories, how you groom yourself if you wash your hair if you cut your nails
216
+ [1405.000 --> 1409.720] that affects what people think about you. So if I come in and my hair is clean and my nails are
217
+ [1409.720 --> 1415.640] done people are going to think I'm a clean person. If I come in and I haven't washed my hair in a
218
+ [1415.720 --> 1421.720] week and my nails are long and dirty that's going to affect how people think of me. Also how you
219
+ [1421.720 --> 1427.640] know we pick our clothes based on the type of image we want to portray. I you know I'm trying to
220
+ [1427.640 --> 1433.000] choose something professional looking as I'm talking to you and I'm not wearing my workout clothes
221
+ [1433.000 --> 1437.640] that I usually wear all day long because I want you to think of me as a professional and somebody
222
+ [1437.640 --> 1443.000] who knows what I'm talking about. So physical appearance is a type of nonverbal communication.
223
+ [1443.000 --> 1449.320] So I hope you learned some new things about nonverbal communication. I hope you have a better
224
+ [1449.320 --> 1454.840] understanding about what it is, what makes up nonverbal communication. I hope you got some ideas
225
+ [1454.840 --> 1459.160] on things you can use to teach your kids how to be better nonverbal communicators.
226
+ [1461.160 --> 1468.840] Now I know that this was a lot of information and I have created a resource, a teaching guide
227
+ [1469.320 --> 1476.040] that I would love for you to have that walks you through teaching these different types of nonverbal
228
+ [1476.040 --> 1481.880] communication. I literally was reading off of it today as I was going over it with you so you
229
+ [1481.880 --> 1488.920] know what is in it and it's going to give you some words to help you teach. It's going to give you
230
+ [1488.920 --> 1495.160] some visuals. It's going to give you a strategy and a place to start and it's going to help you
231
+ [1495.160 --> 1501.160] teach these skills in a really strategic way. So if you're interested in purchasing this for me,
232
+ [1501.160 --> 1507.960] there is a link in the description below. Additionally, I have a whole bundle of teaching guides
233
+ [1507.960 --> 1514.040] that teach social communication skills. This is included in it and all of my teaching guides are
234
+ [1514.040 --> 1519.000] included in it. So it helps you, it's full of guides that help you teach things like taking
235
+ [1519.000 --> 1524.360] someone's perspective, code switching, power relationships, conversation skills, friendship
236
+ [1524.360 --> 1531.320] making skills. I have teaching guides to help you teach these skills to your students.
237
+ [1531.320 --> 1538.520] So there's a link for that in the description below as well. Thank you again, thank you for taking
238
+ [1538.520 --> 1542.760] your time to spend with me. Thank you for taking the time to learn something new. I hope you found
239
+ [1542.760 --> 1547.880] it helpful. If you'd like to keep getting videos like this or knowing when some new ones come out,
240
+ [1547.880 --> 1552.920] click subscribe and be a part of our community. Thanks!
transcript/allocentric_uxBeSEughAc.txt ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [0.000 --> 1.700] I don't want you to have to.
2
+ [1.700 --> 4.700] You don't want to have to.
3
+ [4.700 --> 5.700] You don't want to have to.
4
+ [5.700 --> 8.380] I find the Ariana much more fragrance than the Raffialla.
5
+ [8.380 --> 10.180] Ariana, oh!
6
+ [10.180 --> 12.020] I'll keep that in mind.
7
+ [12.020 --> 12.780] How you doing?
8
+ [12.780 --> 14.300] Fine.
9
+ [14.300 --> 17.420] I never got a chance to thank you for holding the camp bus.
10
+ [17.420 --> 18.320] Oh, please, please.
11
+ [18.320 --> 20.700] Anytime you need a bus, I am your guy.
12
+ [20.700 --> 22.460] And I noticed Aaron's teeth are looking good.
13
+ [22.460 --> 23.700] Thanks to you.
14
+ [23.700 --> 26.500] I just hope she's remembering to wear her night retainer.
15
+ [26.500 --> 27.600] Well, you know how kids are.
16
+ [27.600 --> 28.420] I can't.
17
+ [28.420 --> 30.620] Oh, please, the minute my Kevin gets off that bus,
18
+ [30.620 --> 32.260] it's goodbye by play.
19
+ [32.260 --> 33.340] Is that for you?
20
+ [33.340 --> 34.900] Oh, yes.
21
+ [34.900 --> 36.780] When Deadrun and I got divorced, I decided
22
+ [36.780 --> 38.340] that I had to learn how to cook.
23
+ [38.340 --> 38.860] Huh.
24
+ [38.860 --> 40.860] The walkie world of Thai cooking.
25
+ [40.860 --> 42.500] I'm branching out.
26
+ [42.500 --> 44.780] No matter the fact, I'm taking a Thai cooking course
27
+ [44.780 --> 45.580] this summer.
28
+ [45.580 --> 46.660] Really?
29
+ [46.660 --> 48.820] I've always been interested in Asian cooking.
30
+ [48.820 --> 49.660] Really?
31
+ [49.660 --> 50.420] Mm-hmm.
32
+ [50.420 --> 51.260] Well, why don't you join me?
33
+ [54.260 --> 57.220] When I think about it, over the years,
34
+ [57.220 --> 59.900] there were less and less moments in the course of the day
35
+ [59.900 --> 63.620] when Ben and I actually made real eye contact.
36
+ [63.620 --> 69.980] If you are not going to share what he's almost asleep.
37
+ [69.980 --> 72.340] Maybe it was the stuff of life.
38
+ [72.340 --> 74.660] Who's going to take Aaron to school?
39
+ [74.660 --> 78.180] Who's turn is it to pick up Josh from his clarinet lessons?
40
+ [78.180 --> 80.660] But after a while, there was a disturbing comfort
41
+ [80.660 --> 82.620] and not really having to deal with each other.
42
+ [82.620 --> 85.660] Because somehow, you just get used to the disconnection.
43
+ [87.340 --> 89.660] And even at night, when we could finally come together,
44
+ [89.660 --> 91.420] we wound up facing forward.
45
+ [91.420 --> 94.540] Yeah, we were tired, but I think we were afraid
46
+ [94.540 --> 98.140] that if we faced each other, there'd be nothing there.
47
+ [98.140 --> 100.420] We're learning me-crab next week.
48
+ [100.420 --> 103.140] Me-crab?
49
+ [103.140 --> 104.460] I'll let you know.
50
+ [104.460 --> 105.980] OK.
51
+ [105.980 --> 108.460] Oh, no.
52
+ [108.460 --> 109.380] I'll call you.
53
+ [109.380 --> 110.260] Oh, you call me.
54
+ [110.260 --> 111.260] OK.
transcript/allocentric_wW7Z52plM0s.txt ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [0.000 --> 12.000] Nonverbal communication differences occur between cultures because of how different people around the world interpret actions in social interaction.
2
+ [12.000 --> 23.000] Understanding the cultural differences in nonverbal communication is important for those with a goal to work in international business.
3
+ [23.000 --> 34.000] Types of nonverbal communication vary based on culture and country, but the areas of differences tend to fall within the following eight areas.
4
+ [34.000 --> 47.000] Each provides an area where people doing business in other parts of the world should understand the nonverbal communication differences between cultures and how to prepare for them.
5
+ [48.000 --> 60.000] I contact. I contact signals confidence in the West, what can be seen as rude or challenging in parts of Asia and the Middle East.
6
+ [60.000 --> 76.000] Also, there are gender rules in I contact around many Eastern cultures discouraging women to make I contact with men as a conveys authority or a sexual interest.
7
+ [78.000 --> 93.000] Touch touch often is used frequently in communication even in a business setting with customs such as a handshake, but other cultures consider touching other people inappropriate.
8
+ [93.000 --> 105.000] Those who live in Asia tend to take a more conservative approach when it comes to touching with a bow typically replacing a handshake.
9
+ [105.000 --> 111.000] Another example of differences with touching is a patting someone in the head.
10
+ [111.000 --> 124.000] In the US, it is seen as endearing and shows affection with children, but in some Asian cultures touching children or adults in the head is disrespectful.
11
+ [124.000 --> 135.000] The US is more conservative in other areas such as not kissing on the cheek as they do in many other parts of Europe.
12
+ [135.000 --> 145.000] Gestures. Many business people who work internationally discipline themselves to keep hand gestures to a minimum.
13
+ [145.000 --> 156.000] For example, pointing at someone else is an insult in most parts of the world, but in some places it is often simply used as a reference.
14
+ [156.000 --> 165.000] Organizations stick out their tongue to greet people, but that doesn't fly in other parts of the world.
15
+ [165.000 --> 174.000] The most common gesture in the world is a nod, but even that can mean different things in other cultures.
16
+ [174.000 --> 177.000] Physical distance.
17
+ [178.000 --> 188.000] The rule of thumb here, no matter where you are in the world, is to give people more space than you think they might need.
18
+ [188.000 --> 191.000] Only get close if invited.
19
+ [191.000 --> 196.000] People in different cultures abuse physical distance differently.
20
+ [197.000 --> 203.000] However, it's best never to enter anyone's personal space about two feet.
21
+ [203.000 --> 212.000] If it's not personal intimacy, many people find such proximity and comfortable.
22
+ [212.000 --> 215.000] Facial expressions.
23
+ [215.000 --> 222.000] The good news is that facial expressions for happiness, sadness, anger and fear are universal.
24
+ [222.000 --> 229.000] The bad news is that not every culture is okay with using them in a business setting.
25
+ [229.000 --> 239.000] The Japanese, for example, try to remain a neutral facial expression, believing that showing your emotions burdens the other person.
26
+ [239.000 --> 242.000] Appearance.
27
+ [242.000 --> 246.000] Conservative attire is the safest bet.
28
+ [246.000 --> 259.000] For some places in the United States are fine with very relaxed appearances, while others even see an exposed shoulder or leg asked a cause for offense.
29
+ [259.000 --> 264.000] The best choice is to simply dress conservatively.
30
+ [264.000 --> 271.000] You can also loosen up your wardrobe if it becomes apparent that it is acceptable.
31
+ [272.000 --> 274.000] Posture.
32
+ [274.000 --> 278.000] Again, the traditional route is the best route.
33
+ [278.000 --> 283.000] Don't slouch when sitting or sit with legs crossed.
34
+ [283.000 --> 291.000] Face people as they speak to you and not enough to show that you are paying attention to what they say.
35
+ [291.000 --> 296.000] Stay mindful of where you sit in meetings.
36
+ [296.000 --> 304.000] In some cultures there is a strict hierarchy for who gets to sit where.
37
+ [304.000 --> 307.000] Parallel language.
38
+ [307.000 --> 314.000] Parallel language refers to communication that is avocalized but not words.
39
+ [314.000 --> 321.000] This includes the tone of voice, loudness, speed of speech and inflection.
40
+ [321.000 --> 328.000] Parallel language is the key to understand the context or meaning of the words used.
41
+ [328.000 --> 341.000] It's important to be mindful of these issues and to understand they are not discernible in emails and texts, so great care must be used in the words you choose.
42
+ [341.000 --> 345.000] High context versus low context.
43
+ [345.000 --> 356.000] Another way to help with understanding the cultural difference in nonverbal communication is understanding the difference between high context and low context cultures.
44
+ [356.000 --> 363.000] High context cultures rely more on nonverbal communication than low context cultures.
45
+ [363.000 --> 371.000] They use personal relationships, social hierarchies and cultural knowledge to convey meaning.
46
+ [371.000 --> 384.000] In low context cultures words are more important. Communication is direct. Relationships begin and end quickly and hierarchies are relaxed.
47
+ [384.000 --> 395.000] For those who aspire to work in an international business, understanding these nonverbal communication differences between cultures is the key to success.
transcript/allocentric_xPiRQ1G241k.txt ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [0.000 --> 6.240] My name is Ryan Peters. I'm a post-doctoral researcher working with Chen New at the University of Texas at Austin,
2
+ [6.240 --> 12.240] and today I'm presenting our paper titled, Are You With Me, Modeling Joint Attention from Child Egocentric Vision.
3
+ [12.240 --> 18.440] Human cognition is, in many ways, a cooperative social process.
4
+ [18.440 --> 26.840] And one of the key mechanisms that enables such social cognition is when we coordinate and share our attention to an object or task.
5
+ [27.240 --> 33.080] A huge body of work shows that such shared or joint attention is crucial for early development.
6
+ [33.080 --> 36.720] It's important for early language learning and other kinds of cultural transmission.
7
+ [36.720 --> 41.560] It predicts developmental outcomes, including language, cognitive and self-regulation abilities.
8
+ [41.560 --> 48.760] And because of these things, it's become a widely used diagnostic marker and target for clinical interventions.
9
+ [48.760 --> 54.760] From the beginning, gaze following has been seen as a kind of holy grail of joint attention.
10
+ [54.760 --> 62.520] Indeed, in Skype and Bruner's pioneering work, joint attention was equivalent to and operationalized as gaze following.
11
+ [62.520 --> 70.520] Because of that, much of the subsequent work has been designed to elicit and measure how well infants follow a social partner's gaze.
12
+ [70.520 --> 79.160] In nearly all this work, looking at the social partner's face has been interpreted as the primary behavioral pathway to check a partner's attentional state.
13
+ [79.160 --> 87.560] And therefore, face looks are deemed as a kind of indicator of awareness of being in joint attention with a social partner.
14
+ [87.560 --> 97.800] However, several recent studies have shown that infants rarely look at their parents' faces during naturalistic parent child activities such as toyplay or meal preparation.
15
+ [97.800 --> 107.400] Since infants aren't looking at their parents' faces in these studies, they instead define joint attention simply as looking at the same object at the same time.
16
+ [107.480 --> 119.800] Crucially, these studies still find predictive relations between patterns of joint attention and vocabulary development, suggesting that awareness may not be a necessary component of joint attention.
17
+ [119.800 --> 128.200] However, this implication assumes that face looks are the only pathway to achieve awareness of joint attention.
18
+ [128.200 --> 135.800] Here, we challenge that assumption and ask whether children can achieve awareness of joint attention without looking at their parents' face.
19
+ [135.880 --> 142.760] Building on recent work showing that attending to held objects plays a critical role in establishing and maintaining joint attention.
20
+ [142.760 --> 147.240] We hypothesize that hand eye coordination may provide an alternate pathway,
21
+ [147.240 --> 149.640] degaining awareness of joint attention.
22
+ [149.640 --> 153.640] To explore this hypothesis, we use classification approach that combines
23
+ [153.640 --> 157.000] head mounted eye tracking and computational modeling.
24
+ [157.000 --> 162.840] First, we brought parent child diets to play the set of 24 toys in a naturalistic environment.
25
+ [162.920 --> 168.600] Then, using the eye trackers, we collected their egocentric views and gaze data, as you see here.
26
+ [168.600 --> 176.840] Next, using that gaze data, we categorize the child egocentric views as belonging to a moment of joint attention or not.
27
+ [176.840 --> 179.800] Here are some of the child egocentric views from the dataset.
28
+ [179.800 --> 183.320] Half of these are from moments of joint attention and half are not.
29
+ [183.320 --> 189.960] Determining which is which based solely on features in a single egocentric view appears to be a non-trivial task.
30
+ [189.960 --> 193.160] But this is precisely what we set up to train our models to do.
31
+ [194.520 --> 203.720] To do so, we fed the child egocentric videos into CNN, models, and provided the ground truth classifications to train them to classify the images.
32
+ [203.720 --> 206.440] We then tested the models using held out data.
33
+ [207.960 --> 211.640] Moving on to the first set of results, we first addressed the question,
34
+ [211.640 --> 217.640] do children's egocentric views contain in-the-moment signals allowing for awareness of joint attention?
35
+ [218.200 --> 223.400] To test this, we explore whether models can classify images as joint attention or not,
36
+ [223.400 --> 224.680] better than chance.
37
+ [224.680 --> 229.240] And one sample, two tailored teetests can vary in model level, subject level,
38
+ [229.240 --> 233.640] and item level mean-balanced accuracy against chance confirms our hypothesis.
39
+ [235.720 --> 239.400] To better understand our model's performance, we also look at the ROC curve,
40
+ [239.400 --> 244.280] which characterizes overall model performance by plotting inverse specificity,
41
+ [244.280 --> 249.640] or how all the models can detect moments that are not joint attention along the x-axis,
42
+ [249.640 --> 255.880] against sensitivity, or how all the models can detect moments that are joint attention along the y-axis.
43
+ [255.880 --> 262.280] And this is done for the full range of threshold values used to binarize the confidence scores output by the models.
44
+ [262.280 --> 267.400] Confidence scores range from 0 to 1, and this black point in the center here
45
+ [267.400 --> 272.200] marks the threshold of 0.5, which we used to generate our results.
46
+ [272.200 --> 276.680] So at this point, frames with confidence scores above 0.5 or classwise,
47
+ [276.680 --> 281.880] joint tension in those with confidence scores below 0.5 or classwise as not joint attention.
48
+ [281.880 --> 287.160] The bottom left-hand corner marks the threshold of 1 for which every frame would be categorized as not
49
+ [287.160 --> 291.640] joint attention, while the top right-hand corner marks the threshold of 0 for which every frame would
50
+ [291.640 --> 293.400] be categorized as joint attention.
51
+ [293.400 --> 299.480] Finally, this dashed line along the diagonal shows performance for a random classifier.
52
+ [299.480 --> 304.840] So the fact that our curve lies above the diagonal confirms our models perform better than
53
+ [304.840 --> 310.840] chance across the full range of threshold values. The area under the curve, or the RLC AUC,
54
+ [310.840 --> 316.680] puts a number to the indicating there's a 67% probability that our models will produce a higher
55
+ [316.680 --> 322.200] confidence score for a randomly chosen joint attention frame than a randomly chosen frame from a moment
56
+ [322.200 --> 327.880] that is not turned attention. To explore the role of object holding, we also classify each frame as
57
+ [327.880 --> 333.560] belonging to different visible holding categories based on a combination of manually coded child and
58
+ [333.560 --> 339.160] parent object holding using the third person videos and automated object detections to determine
59
+ [339.160 --> 345.720] the visibility of objects in a child's egocentric use. Using these, we can compare how the model classwise
60
+ [345.720 --> 350.680] frames are which neither the child nor parent were holding a visible object versus frames in which
61
+ [350.680 --> 356.440] only the child, only the parent or both the child and parent were jointly holding the same visible object.
62
+ [357.160 --> 362.920] These last three holding categories all require there is only a single held visible object,
63
+ [362.920 --> 367.560] allowing for a clean line of reasoning as to why such views might support detection of joint
64
+ [367.560 --> 372.120] attention. However, that is not always the case. There are frames in which the child or parent are
65
+ [372.120 --> 375.640] holding two visible objects to frames in which the child and parent are each holding different
66
+ [375.640 --> 381.000] visible objects. So we created a separate category for frames with such conflicting holding cues.
67
+ [381.880 --> 386.120] Moving on to the results here, we address the question, does object holding provide in the moment
68
+ [386.120 --> 392.680] signals useful for detecting joint attention and our hypotheses are that models will leverage signals
69
+ [392.680 --> 399.800] tied to visible holding to detect moments of joint attention and signals tied to the lack of visible
70
+ [399.800 --> 405.640] object holding to detect moments that are not joint attention. Focusing on the first hypothesis,
71
+ [405.640 --> 410.920] we again look at sensitivity or how are the models detecting joint attention moments for each
72
+ [411.000 --> 416.200] of the visible holding categories, neither only child, only parent, both, and conflicting along the
73
+ [416.200 --> 423.240] xxc's here. And pairwise comparisons across the five categories reveal that models show greater
74
+ [423.240 --> 428.360] sensitivity for frames with visible held objects, and in particular, those in which both child and
75
+ [428.360 --> 436.040] parent are jointly holding an object, confirming our hypothesis. Next, focusing on the second hypothesis,
76
+ [436.040 --> 440.520] we again look at specificity or how well the models do at detecting moments that are not
77
+ [440.520 --> 445.160] joint attention for each of the holding categories, and here pairwise comparisons across the categories
78
+ [445.160 --> 450.360] reveal that models show greatest specificity for moments in which neither child, nor parent,
79
+ [450.360 --> 456.520] are holding a visible object, again, confirming our hypothesis. Finally, we can look at the RLC curves
80
+ [456.520 --> 460.760] for each of the holding categories, providing overall estimates of how well the models do for
81
+ [460.760 --> 465.640] each of the categories. And again, the point showed the values associated with the threshold at 0.5
82
+ [465.720 --> 471.160] used to generate our results. And as you can see, the models do better than chance for all the
83
+ [471.160 --> 478.360] holding categories, but they're most accurate for frames in which both child and parent are jointly
84
+ [478.360 --> 487.640] holding a visible audit and struggle with frames containing conflicting holding cues. So we see
85
+ [487.640 --> 493.080] that models are indeed able to classify joint attention better than chance, and they leverage
86
+ [493.160 --> 499.080] visible object holding to do this. Taken together, we think this confirms our overarching hypothesis that
87
+ [499.080 --> 504.600] children might be able to achieve awareness of their parent's attentional state by leveraging
88
+ [504.600 --> 510.520] in the moment visual signals tied to object holding. However, one major difference between our models
89
+ [510.520 --> 515.560] and children is that our models had a training signal. We told them what frames were and were not
90
+ [515.560 --> 521.960] joint attention, but what could be the training signal for children? In study 2, we address this question.
91
+ [522.760 --> 528.920] Based on study 1, we know that visual signals tied to object holding can be used to detect moments
92
+ [528.920 --> 535.560] of joint attention. Next, we wondered what if children simply assume they are in joint attention
93
+ [535.560 --> 541.240] when they look at an object held by themselves or their parents? In other words, what if children can
94
+ [541.240 --> 547.000] leverage their own attentional state in conjunction with object holding as a training signal to learn
95
+ [547.240 --> 553.160] to detect moments of joint attention? To explore this hypothesis, we trained three different models with
96
+ [553.160 --> 558.200] joint attention defined using a combination of object holding and child attention. One model was
97
+ [558.200 --> 562.840] trained using a dataset consisting of frames in which only the child was holding a visible object,
98
+ [562.840 --> 567.400] and for which frames in which the child was attending to the held object were defined as joint
99
+ [567.400 --> 572.760] attention and frames in which the child was not attending to the held object were defined as not
100
+ [573.000 --> 577.800] attention. A second model was similarly trained using only parent frames and a third model
101
+ [577.800 --> 584.120] was trained using either only child or only parent frames. After training, we then tested the models
102
+ [584.120 --> 589.240] on ground truth joint attention, exactly as was done for the models in study 1.
103
+ [590.920 --> 594.840] Moving on to the results, since we're asking whether models can learn to detect joint attention,
104
+ [594.840 --> 600.280] our primary hypothesis is that models will show greater than chance sensitivity for the trained
105
+ [600.360 --> 606.600] categories. However, we also wondered whether the models might be able to generalize what they
106
+ [606.600 --> 612.280] learned to other object holding categories and maybe even show similar patterns of sensitivity
107
+ [612.280 --> 618.200] across holding categories to models trained on ground truth joint attention. So here are the
108
+ [618.200 --> 623.160] results for each of the models and we see that all three show greater than chance sensitivity
109
+ [623.240 --> 630.280] for the trained categories marked in blue, red, green respectively, confirming our first hypothesis.
110
+ [631.240 --> 636.520] Next looking at how the models generalize to cross holding categories and how those distributions
111
+ [636.520 --> 641.800] compare with what we saw in study 1 in upper left hand corner here, we see that all three models
112
+ [641.800 --> 647.240] show striking similarities with the models from study 1. In particular, all models show greatest
113
+ [647.240 --> 652.680] sensitivity for frames in which both parent and child are jointly holding a visible object.
114
+ [653.160 --> 657.800] And lowest or nearly lowest sensitivity for frames in which neither parent nor child are holding
115
+ [657.800 --> 663.640] an object, confirming our second hypothesis. It's worth noting that the model trained on frames with
116
+ [663.640 --> 668.600] either child or parent holding shows the greatest similarity highlighting the importance of having
117
+ [668.600 --> 675.800] access to both types of keys. To conclude, our results broadly show that children might be able to
118
+ [675.800 --> 681.880] achieve awareness of joint attention by leveraging in the moment visual signals tied to object holding,
119
+ [681.880 --> 687.080] and children can theoretically generate their own training signal and learn to detect moments of
120
+ [687.080 --> 691.800] joint attention simply by assuming they're enjoyed attention when they look at an object held by
121
+ [691.800 --> 696.360] themselves or their parents. In other words, face looks are not the only way to gain awareness of
122
+ [696.360 --> 703.080] joint attention in real time in social situations. There are complementary social signals encoded in
123
+ [703.080 --> 708.840] bodily behaviors such as attending to objects held by oneself or a social partner. All together,
124
+ [708.840 --> 713.480] I think this work is a good case study demonstrating how things that we study at the social level
125
+ [713.480 --> 719.640] can be grounded and embedded in the sensory motor level. In other words, social and sensory motor levels
126
+ [719.640 --> 726.040] provide complementary rather than competing explanations. Here are my references. And to conclude,
127
+ [726.040 --> 736.040] I want to thank everyone at UT Austin and Indiana University who made this work.
video/TED_-FOCpMAww28.f140.m4a ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ee4e6d7ce68266ecbed30cc83308d2f66757896dbe3722e69862721dce18178
3
+ size 7714516
video/TED_-FOCpMAww28.f247.webm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23def1a6d9065ed6bd2c47f04b20565dd79db74a2677c2d039cdddc328ff2e6b
3
+ size 31918601
video/TED_-FOCpMAww28.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ac681b558a40d6a6d40f2079b61a0733dc9f0c707cb00f33a06cc43cf85cc61
3
+ size 39800301
video/TED_1zpf8H_Dd40.f140.m4a ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecfdb144906245e02f8843c08e5eb21541ee0a131140cf45de8dc1db4e00797c
3
+ size 20215212
video/TED_1zpf8H_Dd40.f616.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d21dd11cae246d6d5dbce324732b5e9bbf395f80c5bdbed1b61d7f21c35834a
3
+ size 342801378
video/TED_1zpf8H_Dd40.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fd41c1ce27e2513a5b837be900b25351f1d1c7c7ae4a5592e085eb7294a4b1e
3
+ size 363265505
video/TED_4TQETLZZmcM.f140.m4a ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7bb64ed1ff97f2f41e05d49ebff9b17320e3478929c3adfaefec10a95748515
3
+ size 17911383
video/TED_4TQETLZZmcM.f248.webm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3593e23ed112ca23feea2301c9ccb735b92a20cdc1278992ebe224990936af48
3
+ size 132496271
video/TED_4TQETLZZmcM.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:632c5772f6b008fd84f8d1da5ebde7bb14168d00eec8aa766036748a3b287280
3
+ size 150775202
video/TED_4jwUXV4QaTw.f251.webm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e07cd39eb9d7bff0b2a05e8595bcc79c8f7715ee1be7d76a1150a6aac2d5f65
3
+ size 12394062
video/TED_4jwUXV4QaTw.f616.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ef6a93bd45e3d7acfdcac5f996a29d60424559d48b1fa56c521f03ba6abf168
3
+ size 150776044
video/TED_4jwUXV4QaTw.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5df6941601d36658eaeccc2d83e59255262c9f496c41855f60a1ea2b75844a7
3
+ size 163162334
video/TED_79HMPQj55yc.f251.webm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9af11db8d0b3a106df9a071dd54901e45fe3d69361191272bcaa9d771f44698b
3
+ size 7154659
video/TED_79HMPQj55yc.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18acb649c593a80331f04108e47212a369b79a297d32715c716f9ccb26cbadd9
3
+ size 80257133
video/TED_8S0FDjFBj8o.f251.webm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:503281e01abd1a6e4d9e839db76e58d600017bd2d1794babe9d69fda2ce55c5a
3
+ size 4564850
video/TED_8S0FDjFBj8o.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ec9fcd5ffb9dcaf40c61eb48d78cc878972cd59c74107ed8986f4569ad4821d
3
+ size 57425536
video/TED_E6NTM793zvo.f140.m4a ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c89f57b7dfe48f71aeeea42885ac10860a3a67b2cfe972639a16d1f8ab2807b
3
+ size 13373611
video/TED_E6NTM793zvo.f248.webm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb84785e323934ed93d4f0d647a38426268341b3767c35015301b9b123a9afef
3
+ size 81738550
video/TED_E6NTM793zvo.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b3c29509ac8e3d51331adfa79f719e835915183da857622e545e2f269d36434
3
+ size 95629085
video/TED_I5x1wQ6kHX0.f140.m4a ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abc2c3f5b2b2f503ebff1ad1f2446f32cde50a2b95e34d2b5b7e834b36f8c69b
3
+ size 26124429
video/TED_I5x1wQ6kHX0.f616.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46b3334dc939f0e59242faa06102516134e8b33a966a3dc87e766db0ab294c1d
3
+ size 290766826
video/TED_I5x1wQ6kHX0.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb1623e8a0e061e418c840d5af14c0674e973afa3a1539af7318abe5993462c1
3
+ size 317215930
video/TED_K0pxo-dS9Hc.f251.webm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7db90fbd2caf98a9a4f9eceb2544ff453443b3608b69eb903a3cac6ecfba5ff2
3
+ size 14449778
video/TED_K0pxo-dS9Hc.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6aa8297a1c294b2e59a74395258e403d3c7967d4db4d04a32eb9fe9bf58b32cd
3
+ size 542249785
video/TED_Ks-_Mh1QhMc.f247.webm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f12314ad5c4baba5b3e0669edae32f58266a70ca21382a6d155793678eefb76
3
+ size 85647656
video/TED_Ks-_Mh1QhMc.f251.webm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac2f76a46d8a9f08c8120259ea9802306ee21ff93cc42e7120177b5ebf88bfd3
3
+ size 19257916
video/TED_Ks-_Mh1QhMc.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7d436de282c13abc42be6dc28ad58d315977097ab953745ea1629d86d1b425c
3
+ size 104872593
video/TED_L9UIF852Boo.f251.webm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87ec89627ffbcf2fa5c82663f1b527f46122b091d76317028efdf4cf49e091aa
3
+ size 19795714
video/TED_L9UIF852Boo.f616.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:047033aca88a284b5f084006c25e0477929da6c73a10aea2cbe88b706c8d986f
3
+ size 619883404
video/TED_OyK0oE5rwFY.f248.webm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cde3c27c18420c011231f7a758917b63d64d422082be940be5ea467396486caf
3
+ size 4451648